tools/internal/xi_vbd_info
tools/internal/xi_vbd_list
xen/drivers/scsi/BusLogic.o
+tools/misc/xen_netwatch
+xen/drivers/scsi/scsi_obsolete.o
+xen/xen
+xen/xen.gz
#define _GNU_SOURCE
#include "dom0_defs.h"
+#define MAX_EXTENTS 32
+#define XEA_SIZE (MAX_EXTENTS * sizeof(xen_extent_t))
int main(int argc, char *argv[])
{
-// block_io_op_t op;
+ block_io_op_t op;
+ unsigned int domain;
+ unsigned short vdevice;
+ xen_extent_t *extents;
+ int i, nextents, ret;
+
+ if( argc != 3) {
+ fprintf(stderr, "Usage: xi_vbd_info domain device\n");
+ return 1;
+ }
+
+ domain = atoi(argv[1]);
+ vdevice = atoi(argv[2]);
+
+ extents = malloc(XEA_SIZE); // convenience
+
+ op.cmd = BLOCK_IO_OP_VBD_INFO;
+ op.u.info_params.domain = domain;
+ op.u.info_params.vdevice = vdevice;
+ op.u.info_params.maxextents = MAX_EXTENTS;
+ op.u.info_params.extents = extents;
+ op.u.info_params.nextents = 0;
+ op.u.info_params.mode = 0;
+
+ if(mlock(extents, XEA_SIZE) != 0) {
+ PERROR("Could not lock memory for Xen hypercall");
+ return -1;
+ }
+
+ ret = do_block_io_op(&op);
+
+ (void)munlock(extents, XEA_SIZE);
+
+ if(ret < 0) {
+ fprintf(stderr, "error %d attempting to query VBD %04x for dom %d\n",
+ ret, vdevice, domain);
+ } else {
+
+ nextents = op.u.info_params.nextents;
+ fprintf(stderr, "Domain %d VBD %04x (mode %s) total of %d extents:\n",
+ domain, vdevice, op.u.info_params.mode == 1 ? "read-only"
+ : "read/write", nextents);
+
+ for(i = 0; i < nextents; i++) {
+ fprintf(stderr, "extent %02d: dev %04x start %ld length %ld\n",
+ i, extents[i].device, extents[i].start_sector,
+ extents[i].nr_sectors);
+ }
+
+ }
- // XXX SMH: writeme
return 0;
}
ret = do_block_io_op(&op);
- if(ret < 0)
- fprintf(stderr, "error %d attempting to probe VBDs\n", ret);
-
(void)munlock(xdi->disks, XDA_SIZE);
- for(i = 0; i < xdi->count; i++) {
- fprintf(stderr,
- "Domain %02d %cBD: [R/%c] device %04x capacity %ldkB\n",
- xdi->disks[i].domain, XD_VIRTUAL(xdi->disks[i].info) ? 'V' :
- 'P', XD_READONLY(xdi->disks[i].info) ? 'O' : 'W',
- xdi->disks[i].device, xdi->disks[i].capacity >> 1);
- }
-
+ if(ret < 0) {
+ fprintf(stderr, "error %d attempting to probe VBDs\n", ret);
+ } else {
+ for(i = 0; i < xdi->count; i++)
+ fprintf(stderr,
+ "Domain %02d %cBD: [R/%c] device %04x capacity %ldkB\n",
+ xdi->disks[i].domain, XD_VIRTUAL(xdi->disks[i].info) ?
+ 'V' : 'P', XD_READONLY(xdi->disks[i].info) ? 'O' : 'W',
+ xdi->disks[i].device, xdi->disks[i].capacity >> 1);
+ }
+
return ret;
}
case BLOCK_IO_OP_VBD_INFO:
/* query information about a particular VBD */
ret = vbd_info(&op.u.info_params);
+ if(ret == 0)
+ copy_to_user(u_block_io_op, &op, sizeof(op));
break;
default:
}
put_task_struct(p);
+
return 0;
}
long vbd_probe(vbd_probe_t *probe_params)
{
struct task_struct *p = NULL;
- int ret;
+ short putp = 0;
+ int ret = 0;
if(probe_params->domain) {
if(probe_params->domain != VBD_PROBE_ALL) {
p = find_domain_by_id(probe_params->domain);
-
+
if (!p) {
printk("vbd_probe attempted for non-existent domain %d\n",
probe_params->domain);
return -EINVAL;
}
+ putp = 1;
}
} else
/* privileged domains always get access to the 'real' devices */
if((ret = ide_probe_devices(&probe_params->xdi))) {
printk("vbd_probe: error %d in probing ide devices\n", ret);
- return ret;
+ goto out;
}
if((ret = scsi_probe_devices(&probe_params->xdi))) {
printk("vbd_probe: error %d in probing scsi devices\n", ret);
- return ret;
+ goto out;
}
}
printk("vbd_probe: error %d in probing virtual devices\n",
ret);
read_unlock_irqrestore(&tasklist_lock, flags);
- return ret;
+ goto out;
}
}
}
/* probe for disks and VBDs for just 'p' */
if((ret = vbd_probe_devices(&probe_params->xdi, p))) {
printk("vbd_probe: error %d in probing virtual devices\n", ret);
- return ret;
+ goto out;
}
}
+ out:
+ if(putp)
+ put_task_struct(p);
- return 0;
+ return ret;
}
long vbd_info(vbd_info_t *info_params)
{
- return -ENOSYS;
+ struct task_struct *p = NULL;
+ xen_extent_le_t *x;
+ xen_extent_t *extents;
+ vbd_t *v;
+ int h, ret = 0;
+
+ if(info_params->domain != current->domain && !IS_PRIV(current))
+ return -EPERM;
+
+ p = find_domain_by_id(info_params->domain);
+
+ if (!p) {
+ printk("vbd_info attempted for non-existent domain %d\n",
+ info_params->domain);
+ return -EINVAL;
+ }
+
+ h = HSH(info_params->vdevice);
+
+ for(v = p->vbdtab[h]; v; v = v->next)
+ if(v->vdevice == info_params->vdevice)
+ break;
+
+ if(!v) {
+ printk("vbd_info attempted on non-existent VBD.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ info_params->mode = v->mode;
+ info_params->nextents = 0;
+
+ extents = info_params->extents; // convenience
+
+ for(x = v->extents; x; x = x->next) {
+ if((ret = copy_to_user(extents++, &x->extent,
+ sizeof(xen_extent_t))) < 0) {
+ printk("vbd_info: copy_to_user failed [rc=%d]\n", ret);
+ goto out;
+ }
+ info_params->nextents++;
+ }
+
+ out:
+ put_task_struct(p);
+ return ret;
}
} vbd_probe_t;
typedef struct _vbd_info {
+ /* IN variables */
unsigned domain; // domain in question
u16 vdevice; // 16 bit id domain refers to VBD as
- u16 nextents; // max no. of extents to return info for
- xen_extent_t *extents; // pointer to space for list of extents
+ u16 maxextents; // max no. of extents to return info for
+ xen_extent_t *extents; // pointer to space for array of extents
+ /* OUT variables */
+ u16 nextents; // no of extents in the above
+ u16 mode; // VBD_MODE_{READONLY,READWRITE}
} vbd_info_t;
#define XDI_MAX 64
static xen_disk_info_t xlblk_disk_info; /* information about our disks/VBDs */
-#if 0
-static int xlblk_control_msg_pending;
-#endif
-
-
/* We plug the I/O ring if the driver is suspended or if the ring is full. */
#define RING_PLUGGED ((BLK_RING_INC(req_prod) == resp_cons) || \
(state != STATE_ACTIVE))
int xenolinux_block_open(struct inode *inode, struct file *filep)
{
+ short xldev = inode->i_rdev;
+ struct gendisk *gd = xldev_to_gendisk(xldev);
xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+
+ /* Don't allow open if device doesn't exist :-) */
+ if(!gd->part[MINOR(xldev)].nr_sects)
+ return -ENXIO; // no such device
+
disk->usage++;
DPRINTK("xenolinux_block_open\n");
return 0;
switch ( operation )
{
-#if 0
- case XEN_BLOCK_PROBE:
- if ( RING_PLUGGED ) return 1;
- sector_number = 0;
- DISABLE_SCATTERGATHER();
- break;
-#endif
case XEN_BLOCK_READ:
case XEN_BLOCK_WRITE:
+
+ /* Get the appropriate gendisk */
gd = xldev_to_gendisk(device);
+
+ /* Update the sector_number we'll pass down as appropriate; note
+ that we could sanity check that resulting sector will be in
+ this partition, but this will happen in xen anyhow */
sector_number += gd->part[MINOR(device)].start_sect;
+
if ( (sg_operation == operation) &&
(sg_dev == device) &&
(sg_next_sect == sector_number) )
(rw == READ) ? XEN_BLOCK_READ : XEN_BLOCK_WRITE,
bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
- if ( full )
- {
- bh->b_reqnext = next_bh;
- pending_queues[nr_pending++] = rq;
- if ( nr_pending >= MAX_PENDING ) BUG();
- goto out;
+ if(full) {
+
+ bh->b_reqnext = next_bh;
+ pending_queues[nr_pending++] = rq;
+ if ( nr_pending >= MAX_PENDING ) BUG();
+ goto out;
+
}
queued++;
}
break;
-#if 0
- case XEN_BLOCK_PROBE:
- xlblk_control_msg_pending = bret->status;
- break;
-#endif
-
default:
BUG();
}
}
-#if 0
-/* Send a synchronous message to Xen. */
-int xenolinux_control_msg(int operation, char *buffer, int size)
-{
- unsigned long flags;
- char *aligned_buf;
-
- /* We copy from an aligned buffer, as interface needs sector alignment. */
- aligned_buf = (char *)get_free_page(GFP_KERNEL);
- if ( aligned_buf == NULL ) BUG();
- memcpy(aligned_buf, buffer, size);
-
- xlblk_control_msg_pending = 2;
- spin_lock_irqsave(&io_request_lock, flags);
- /* Note that size gets rounded up to a sector-sized boundary. */
- if ( hypervisor_request(0, operation, aligned_buf, 0, (size+511)/512, 0) )
- return -EAGAIN;
- signal_requests_to_xen();
- spin_unlock_irqrestore(&io_request_lock, flags);
- while ( xlblk_control_msg_pending == 2 ) barrier();
-
- memcpy(buffer, aligned_buf, size);
- free_page((unsigned long)aligned_buf);
-
- return xlblk_control_msg_pending ? -EINVAL : 0;
-}
-#endif
-
static void reset_xlblk_interface(void)
{
block_io_op_t op;
-// xlblk_control_msg_pending = 0;
nr_pending = 0;
op.cmd = BLOCK_IO_OP_RESET;